In [1]:
% reset -f
from __future__ import print_function
from __future__ import division
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import sys
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
# call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())
print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())
Numpy Pytorch
In [3]:
from __future__ import print_function
import torch
from torch.autograd import Variable
In [4]:
x=torch.Tensor(3,2)
print (type(x))
print (x)
# how variables work
x = Variable(x)
print ("x:" + str (x))
print ("requires grad:" + str(x.requires_grad))
print ("data:" + str(x.data))
In [5]:
x=torch.rand(3,4)
print (type(x))
print (x)
In [6]:
print (x[1:])
In [7]:
x.numpy()
Out[7]:
In [8]:
if torch.cuda.is_available():
x = x.cuda()*2
print (type(x))
print (x)
In [ ]:
In [ ]: